/* used by debug verify code */
unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
+#define MAX_PIN_BATCH 1024
+ struct mmuext_op pin[MAX_PIN_BATCH];
+ unsigned int nr_pins = 0;
+
xcio_info(ioctxt, "xc_linux_restore start\n");
if ( mlock(&ctxt, sizeof(ctxt) ) )
xcio_info(ioctxt, "Received all pages\n");
+ if ( finish_mmu_updates(xc_handle, mmu) )
+ goto out;
+
/*
* Pin page tables. Do this after writing to them as otherwise Xen
* will barf when doing the type-checking.
*/
for ( i = 0; i < nr_pfns; i++ )
{
+ if ( (pfn_type[i] & LPINTAB) == 0 )
+ continue;
if ( pfn_type[i] == (L1TAB|LPINTAB) )
+ pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
+ else /* pfn_type[i] == (L2TAB|LPINTAB) */
+ pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
+ pin[nr_pins].mfn = pfn_to_mfn_table[i];
+ if ( ++nr_pins == MAX_PIN_BATCH )
{
- if ( pin_table(xc_handle, MMUEXT_PIN_L1_TABLE,
- pfn_to_mfn_table[i], dom) ) {
- printf("ERR pin L1 pfn=%lx mfn=%lx\n",
- (unsigned long)i, pfn_to_mfn_table[i]);
- goto out;
- }
- }
- }
-
- /* must pin all L1's before L2's (need consistent va back ptr) */
- for ( i = 0; i < nr_pfns; i++ )
- {
- if ( pfn_type[i] == (L2TAB|LPINTAB) )
- {
- if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
- pfn_to_mfn_table[i], dom) ) {
- printf("ERR pin L2 pfn=%lx mfn=%lx\n",
- (unsigned long)i, pfn_to_mfn_table[i]);
+ if ( do_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 )
goto out;
- }
+ nr_pins = 0;
}
}
- if ( finish_mmu_updates(xc_handle, mmu) ) goto out;
+ if ( (nr_pins != 0) &&
+ (do_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) )
+ goto out;
xcio_info(ioctxt, "\b\b\b\b100%%\n");
xcio_info(ioctxt, "Memory reloaded.\n");
int pin_table(
int xc_handle, unsigned int type, unsigned long mfn, domid_t dom)
{
- int err = 0;
struct mmuext_op op;
- privcmd_hypercall_t hypercall;
op.cmd = type;
op.mfn = mfn;
- hypercall.op = __HYPERVISOR_mmuext_op;
- hypercall.arg[0] = (unsigned long)&op;
- hypercall.arg[1] = 1;
- hypercall.arg[2] = 0;
- hypercall.arg[3] = dom;
-
- if ( mlock(&op, sizeof(op)) != 0 )
- {
- PERROR("Could not lock mmuext_op");
- err = 1;
- goto out;
- }
-
- if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
- {
- ERROR("Failure when submitting mmu updates");
- err = 1;
- }
+ if ( do_mmuext_op(xc_handle, &op, 1, dom) < 0 )
+ return 1;
- (void)munlock(&op, sizeof(op));
-
- out:
- return err;
+ return 0;
}
static int flush_mmu_updates(int xc_handle, mmu_t *mmu)
out1: return ret;
}
+static inline int do_mmuext_op(
+ int xc_handle,
+ struct mmuext_op *op,
+ unsigned int nr_ops,
+ domid_t dom)
+{
+ privcmd_hypercall_t hypercall;
+ long ret = -EINVAL;
+
+ hypercall.op = __HYPERVISOR_mmuext_op;
+ hypercall.arg[0] = (unsigned long)op;
+ hypercall.arg[1] = (unsigned long)nr_ops;
+ hypercall.arg[2] = (unsigned long)0;
+ hypercall.arg[3] = (unsigned long)dom;
+
+ if ( mlock(op, nr_ops*sizeof(*op)) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ goto out1;
+ }
+
+ if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+ {
+ fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
+ " rebuild the user-space tool set?\n",ret,errno);
+ goto out2;
+ }
+
+ out2: (void)munlock(op, nr_ops*sizeof(*op));
+ out1: return ret;
+}
+
/*
* PFN mapping.
nx &= ~PGT_va_mask;
nx |= type; /* we know the actual type is correct */
}
- else if ( unlikely((x & PGT_va_mask) != (type & PGT_va_mask)) )
+ else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
+ ((type & PGT_va_mask) != (x & PGT_va_mask)) )
{
/* This table is potentially mapped at multiple locations. */
nx &= ~PGT_va_mask;
switch ( op.cmd )
{
case MMUEXT_PIN_L1_TABLE:
- /*
- * We insist that, if you pin an L1 page, it's the first thing that
- * you do to it. This is because we require the backptr to still be
- * mutable. This assumption seems safe.
- */
type = PGT_l1_page_table | PGT_va_mutable;
pin_page: